SETUP

Import Packages

Import Data

#example grids from package
# bell2010
# mackay1992
# fbb2003

## IMPORT CLEAN PARTICIPANT-LEVEL GRIDS
## load grid (continuous constructs only) for each participant 
## #importExcel from OpenRepGrid package, creates S4 object 
p4 <-  importExcel(file= "data/participant_grids/simple/P04_clean.xlsx") # researcher
p5 <-  importExcel(file= "data/participant_grids/simple/P05_clean.xlsx") # researcher
p6 <-  importExcel(file= "data/participant_grids/simple/P06_clean.xlsx") # researcher
p7 <-  importExcel(file= "data/participant_grids/simple/P07_clean.xlsx") # researcher
p15 <- importExcel(file= "data/participant_grids/simple/P15_clean.xlsx") # researcher 


p8 <-  importExcel(file= "data/participant_grids/simple/P08_clean.xlsx") # designer
p9 <-  importExcel(file= "data/participant_grids/simple/P09_clean.xlsx") # designer
p10 <- importExcel(file= "data/participant_grids/simple/P10_clean.xlsx") # designer
p11 <- importExcel(file= "data/participant_grids/simple/P11_clean.xlsx") # designer
p12 <- importExcel(file= "data/participant_grids/simple/P12_clean.xlsx") # designer
p13 <- importExcel(file= "data/participant_grids/simple/P13_clean.xlsx") # designer
p14 <- importExcel(file= "data/participant_grids/simple/P14_clean.xlsx") # designer 

## IMPORT RAW CODED-DATA DATAFRAME
## row = one participant construct (elements as cols)
df_raw <- read_csv(file = "data/CODED_CONSTRUCTS.csv",  na=c("", "NA"))

Wrangle Data

names <- c("RESEARCHER P4","RESEARCHER P5","RESEARCHER P6","RESEARCHER P7","RESEARCHER P15","DESIGNER P8","DESIGNER P9","DESIGNER P10","DESIGNER P11","DESIGNER P12","DESIGNER P13","DESIGNER P14")

stimuli <- c("CAREBEAR_BARS","LADY_LINE","BULLET_BARS","CARTOMAP","MAN_INFO","PENGUIN_DISTS","HISTO_DIST",  "IXN_EBARS","IXN_SLOPE","BAYES_RIDGES")



## CREATE GROUP-LEVEL GRIDS
g_designers <- p4 + p5 + p6 + p7 + p15
g_designers <- p8 + p9 + p10 + p11 + p12 + p13 + p14

## CREATE MASTER GRID
g_all <- p4 + p5 + p6 + p7 + p15 + p8 + p9 + p10 + p11 + p12 + p13 + p14

## CREATE LIST OF GRIDS
list_all <- list(p4 , p5 , p6 , p7 , p15 , p8 , p9 , p10 , p11 , p12 , p13 , p14)


## MINIMAL CODED-DATA FRAME ONLY CONTINUOUS CONSTRUCTS
df_coded <- df_raw %>% 
  filter(CONSTRUCT_TYPE == "NUMERIC") %>% 
  mutate(
    PID = as.factor(PID),
    SAMPLE = factor(SAMPLE),
    CONSTRUCT_TYPE = factor(CONSTRUCT_TYPE),
    POLE_LEFT  = factor(POLE_LEFT),
    POLE_RIGHT = factor(POLE_RIGHT),
    POLES = paste0(POLE_LEFT,"-",POLE_RIGHT),
    FIRST = factor(FIRST),
    SECOND = factor(SECOND),
    THIRD = factor(THIRD),
    CODE_FULL = factor(CODE_STANDARD),
    CODE = factor(paste0(FIRST,"(",SECOND,")")),
    RELFEXIVE = as.logical(REFLEXIVE),
    MATCH = as.logical(MATCH)) %>% 
  mutate(
    across(CAREBEAR_BARS:BAYES_RIDGES, .fns = as.numeric)) %>% 
  select(
    -(CODE_DH:CODE_STANDARD)
  )

## LONG DATAFRAME 
## row = one participant construct X element
df_codedElements <- df_coded %>%
  pivot_longer(
    cols = CAREBEAR_BARS:BAYES_RIDGES,
    names_to ="ELEMENT") %>% 
  mutate(
    value=as.numeric(value),
    POLES = factor(POLES),
    ELEMENT = factor(ELEMENT, levels=stimuli)
  )

#ANALYSIS

PARTICIPANT LEVEL

Construct Clustering

## PRINT CLUSTER FOR EACH PARTICIPANT
i=1
for (l in list_all){
  title = names[i]

  # # calculate cluster analysis
  # # https://docs.openrepgrid.org/articles/web/clustering.html
  cluster(l, along = 1, #1=constructs, 2 = elements, 0 = both (default)
        dmethod =  "euclidean",#distance measure TODO evaluate diff options    
        cmethod="ward.D", #agglomeration method TODO evaluate diff options
        align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
        cex = 1, lab.cex = 1, main = title)  
  i=i+1
}

Element Clustering

## PRINT CLUSTER FOR EACH PARTICIPANT
i=1
for (l in list_all){
  title = names[i]

  # # calculate cluster analysis
  # # https://docs.openrepgrid.org/articles/web/clustering.html
  cluster(l, along = 2, #1=constructs, 2 = elements, 0 = both (default)
        dmethod =  "euclidean",#distance measure TODO evaluate diff options    
        cmethod="ward.D", #agglomeration method TODO evaluate diff options
        align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
        cex = 1, lab.cex = 1, main = title)  
  i=i+1
}

Visualize Combined Clustering

## PRINT BERTIN CLUSTER MAP  FOR EACH PARTICIPANT
i=1
for (l in list_all){
  title = names[i]
  print(title)

  # https://docs.openrepgrid.org/articles/web/visualization-bertin.html
  bertinCluster(l, along =0, #1=constructs, 2 = elements, 0 = both (default)
        dmethod =  "euclidean",#distance measure TODO evaluate diff options    
        cmethod="ward.D", #agglomeration method TODO evaluate diff options
        align = TRUE, #align b4 clustering? reverses constructs if necessary to yield maximal simmilarity
        type = "rectangle",
        cex = 1, lab.cex = 1,
        trim=50, draw.axis = FALSE)
  i=i+1
}
## [1] "RESEARCHER P4"

## [1] "RESEARCHER P5"

## [1] "RESEARCHER P6"

## [1] "RESEARCHER P7"

## [1] "RESEARCHER P15"

## [1] "DESIGNER P8"

## [1] "DESIGNER P9"

## [1] "DESIGNER P10"

## [1] "DESIGNER P11"

## [1] "DESIGNER P12"

## [1] "DESIGNER P13"

## [1] "DESIGNER P14"

PCA BIPLOTS

## PRINT PCA BIPLOT for each participant
i=1
for (l in list_all){
  title = names[i]
  print(title)
  
  # https://docs.openrepgrid.org/articles/web/visualization-biplot.html
  # biplotSimple(p15) 
  biplot2d(l)
  # biplotEsa2d(p15)
  # biplotSlater2d(p15)
  # biplotPseudo3d(p15)  
  i=i+1
}
## [1] "RESEARCHER P4"

## [1] "RESEARCHER P5"

## [1] "RESEARCHER P6"

## [1] "RESEARCHER P7"

## [1] "RESEARCHER P15"

## [1] "DESIGNER P8"

## [1] "DESIGNER P9"

## [1] "DESIGNER P10"

## [1] "DESIGNER P11"

## [1] "DESIGNER P12"

## [1] "DESIGNER P13"

## [1] "DESIGNER P14"

WIP PCA — CONSTRUCTS

###### OPEN REP GRID APPROACH
constructPca(p15, nfactors = 2, trim=50, rotate="varimax",method="pearson")
## 
## #################
## PCA of constructs
## #################
## 
## Number of components extracted: 2
## Type of rotation: varimax 
## 
## Loadings:
##                                       RC1   RC2  
## context-dependent - stand-alone        0.59 -0.58
## quick to make - long time to make      0.90  0.18
## sole author - team author              0.87 -0.15
## author no stats backgroun - stats phd  0.08  0.92
## story-first - data-first              -0.17  0.86
## hate (rip up) - love (publish)         0.76 -0.40
## not clever - clever                    0.55 -0.61
## 
##                 RC1  RC2
## SS loadings    2.84 2.51
## Proportion Var 0.41 0.36
## Cumulative Var 0.41 0.76
###### VERSION psych::principal() 
## constructPca() is equivalent to this
corr <- constructCor(p15)

(p <- principal(corr, nfactors = 2, rotate="varimax", cor = "cor")) 
## Principal Components Analysis
## Call: principal(r = corr, nfactors = 2, rotate = "varimax", cor = "cor")
## Standardized loadings (pattern matrix) based upon correlation matrix
##                           RC1   RC2   h2   u2 com
## context-de - stand-alon  0.59 -0.58 0.69 0.31 2.0
## quick to m - long time   0.90  0.18 0.84 0.16 1.1
## sole autho - team autho  0.87 -0.15 0.78 0.22 1.1
## author no  - stats phd   0.08  0.92 0.85 0.15 1.0
## story-firs - data-first -0.17  0.86 0.77 0.23 1.1
## hate (rip  - love (publ  0.76 -0.40 0.75 0.25 1.5
## not clever - clever      0.55 -0.61 0.68 0.32 2.0
## 
##                        RC1  RC2
## SS loadings           2.84 2.51
## Proportion Var        0.41 0.36
## Cumulative Var        0.41 0.76
## Proportion Explained  0.53 0.47
## Cumulative Proportion 0.53 1.00
## 
## Mean item complexity =  1.4
## Test of the hypothesis that 2 components are sufficient.
## 
## The root mean square of the residuals (RMSR) is  0.11 
## 
## Fit based upon off diagonal values = 0.95
print(p)## PREFERRED OUTPUT! 
## Principal Components Analysis
## Call: principal(r = corr, nfactors = 2, rotate = "varimax", cor = "cor")
## Standardized loadings (pattern matrix) based upon correlation matrix
##                           RC1   RC2   h2   u2 com
## context-de - stand-alon  0.59 -0.58 0.69 0.31 2.0
## quick to m - long time   0.90  0.18 0.84 0.16 1.1
## sole autho - team autho  0.87 -0.15 0.78 0.22 1.1
## author no  - stats phd   0.08  0.92 0.85 0.15 1.0
## story-firs - data-first -0.17  0.86 0.77 0.23 1.1
## hate (rip  - love (publ  0.76 -0.40 0.75 0.25 1.5
## not clever - clever      0.55 -0.61 0.68 0.32 2.0
## 
##                        RC1  RC2
## SS loadings           2.84 2.51
## Proportion Var        0.41 0.36
## Cumulative Var        0.41 0.76
## Proportion Explained  0.53 0.47
## Cumulative Proportion 0.53 1.00
## 
## Mean item complexity =  1.4
## Test of the hypothesis that 2 components are sufficient.
## 
## The root mean square of the residuals (RMSR) is  0.11 
## 
## Fit based upon off diagonal values = 0.95
############ ?? not really sure if this is element or construct?
###### VERSION base::prcomp() 
### 1. CREATE TRANSPOSED DF FOR PCA ON CONSTRUCTS
df <- df_coded %>% 
  filter(PID=="P15") %>%
  select(CAREBEAR_BARS:BAYES_RIDGES, POLES) 
poles <- df$POLES # save construct names
#transpose
df <- t(df) %>% as_tibble()
## Warning: The `x` argument of `as_tibble.matrix()` must have unique column names if
## `.name_repair` is omitted as of tibble 2.0.0.
## ℹ Using compatibility `.name_repair`.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
colnames(df) = poles
#drop last row
df <- df[1:length(stimuli),]  # %>% slice(1:(n() - 1))
df <- df %>% mutate_all(as.numeric)
df <- df %>% mutate(element = factor(stimuli))
dpca <- df %>% select(where(is.numeric)) #get just the numeric cols 

### 2. RUN PCA ON DT 
pca <- prcomp(dpca, scale = TRUE )
summary(pca)
## Importance of components:
##                           PC1    PC2    PC3     PC4     PC5     PC6     PC7
## Standard deviation     1.9293 1.2776 0.9003 0.67565 0.44771 0.37758 0.18806
## Proportion of Variance 0.5318 0.2332 0.1158 0.06521 0.02864 0.02037 0.00505
## Cumulative Proportion  0.5318 0.7649 0.8807 0.94595 0.97458 0.99495 1.00000
# data plot
autoplot(pca, data = df, color = "element", label=TRUE, label.size=5) + theme_minimal()

# biplot
biplot(pca)

# scree plot
plot(pca, type="lines")

WIP PCA ELEMENTS

!! TODO GROUP LEVEL

!! WIP LMER

# df <- df_codedElements %>% 
#   select(
#     value, ELEMENT,CODE,POLES,PID,SAMPLE
#   )
# 
# 
# 
# m0 <- lmer(value ~ (1|PID),  data = df )
# summary(m0)
# 
# levels(df$ELEMENT)
# m1 <- lmer(value ~ ELEMENT*CODE + SAMPLE + (1|PID), data = df)
# summary(m1)
# plot_model(m1,type="pred", terms=c("CODE"))

FUTURE AMY TODO

Conflicts

## TODO WTAF is measured as 'conflict'? see 
# https://docs.openrepgrid.org/articles/web/measures-conflict.html
#Bell, R. C. (2004). A new approach to measuring inconsistency or conflict in grids. Personal Construct Theory & Practice, 1, 53–59.
#Heider, F. (1946). Attitudes and cognitive organization. Journal of Psychology, 21, 107–112.

indexConflict3(p4)
## 
## ##########################################################
## CONFLICT OR INCONSISTENCIES BASED ON TRIANGLE INEQUALITIES
## ##########################################################
## 
## Potential conflicts in grid:  150
## Actual conflicts in grid:  66
## Overall percentage of conflict in grid:  44 %
## 
## ELEMENTS
## ########
## 
## Percent of conflict attributable to element:
## 
##                      percentage
## 1 care-bear             1666.67
## 2 diamond-lady          1363.64
## 3 bullets               1212.12
## 4 heatmap                454.55
## 5 taxes                  151.52
## 6 penguins               606.06
## 7 physics-matplotlib    1666.67
## 8 interaction            909.09
## 9 slope-interaction      909.09
## 10 bayesian             1060.61
## 
## Chi-square test of equal count of conflicts for elements.
## 
##  Chi-squared test for given probabilities
## 
## data:  x$e.count
## X-squared = 14.909, df = 9, p-value = 0.09346
## 
## 
## CONSTRUCTS
## ##########
## 
## Percent of conflict attributable to construct:
## 
##                           percentage
## 1 not biased - biased          16.67
## 2 dry - sensationa             14.39
## 3 don't trus - trust the       12.88
## 4 don't beli - believe au      17.42
## 5 audience e - high edu l      14.39
## 6 stop - scroll                24.24
## 
## Chi-square test of equal count of conflicts for constructs.
## 
##  Chi-squared test for given probabilities
## 
## data:  x$c.count
## X-squared = 6.5455, df = 5, p-value = 0.2567

Implicative Dilema

#https://docs.openrepgrid.org/articles/web/measures-implicative.html

# Implicative dilemmas are closely related to the notion of conflict. An implicative dilemma arises when a desired change on one construct is associated with an undesired implication on another construct. E. g. a timid subject may want to become more socially skilled but associates being socially skilled with different negative characteristics (selfish, insensitive etc.). Hence, he may anticipate that becoming less timid will also make him more selfish (cf. Winter, 1982). 

i <- indexDilemma(p15, self=, ideal=10)
## TODO really actually figure out 1. if this is useful and 2. what it is doing. 3. how to define the self (vs) ideal self and align poles
plot(i)

SCRATCH

Create Grid from dataframe

# # CREATE a custom grid from the coded constructs dataframe
# 
# ######## FILTER MASTER DATAFRAME 
# d <- df %>% 
#   filter(
#       PID=="P15",
#       CONSTRUCT_TYPE=="NUMERIC"
#       ) %>% 
#   mutate_at(vars(CAREBEAR_BARS:BAYES_RIDGES), as.numeric) %>% 
#   mutate(
#     COUPLED = paste0(POLE_LEFT,"-",POLE_RIGHT),
#     CONSTRUCT = paste0(FIRST,"(",SECOND,")")
#   ) %>% select (
#     POLE_LEFT, POLE_RIGHT,
#     COUPLED,
#     CONSTRUCT,
#     CAREBEAR_BARS:BAYES_RIDGES)
#   # ) %>% column_to_rownames(var = "CONSTRUCT")
#   # ) %>% column_to_rownames(var = "CODE_STANDARD")
# ########### 
# 
# ## elements
# e <- d %>% select(-(POLE_LEFT:CONSTRUCT)) %>% colnames()
# # e <- c("care-bear","diamond-lady","bullets","heatmap","taxes",
#        # "penguins","physics-matplotlib","interaction","slope-interaction","bayesian")
# ## construct left pole
# l <- d %>% pull(POLE_LEFT)
# ## construct right pole
# r <- d %>% pull(POLE_RIGHT)
# ## construct code
# c <- d %>% pull(CONSTRUCT)
# 
# ## ratings
# ## have to unravel dataframe by row; unlist goes by column, 
# ## so instead, first transpose, then use 
# s <- c(t(d %>% select(CAREBEAR_BARS:BAYES_RIDGES)))
# 
# ## ASSEMBLE NEW REPGRID OBJECT
# ## args 
# args <- list(
#   name = e,
#   l.name = c,
#   # r.name = r,
#   coupled =F,
#   scores = s
#   ) 
# t15 <- makeRepgrid(args)
# t15 <- setScale(t15, 1, 5)
# t15
# 
# 
# g_double <- t15+t15

Construct Description (RAW)

statsConstructs(p15,trim=50)
## 
## ####################################
## Desriptive statistics for constructs
## ####################################
## 
##                                           vars  n mean   sd median trimmed  mad
## (1) context-dependent - stand-alone          1 10 3.35 1.73   4.00    3.44 1.48
## (2) quick to make - long time to make        2 10 3.00 1.68   3.00    3.00 2.59
## (3) sole author - team author                3 10 2.50 1.78   1.50    2.38 0.74
## (4) author no stats backgroun - stats phd    4 10 3.80 1.21   4.00    4.00 1.11
## (5) story-first - data-first                 5 10 3.30 1.69   4.50    3.38 0.37
## (6) hate (rip up) - love (publish)           6 10 3.15 1.06   3.50    3.12 1.11
## (7) not clever - clever                      7 10 2.80 1.44   2.75    2.81 1.85
##                                           min max range  skew kurtosis   se
## (1) context-dependent - stand-alone         1 5.0   4.0 -0.45    -1.72 0.55
## (2) quick to make - long time to make       1 5.0   4.0 -0.05    -1.86 0.53
## (3) sole author - team author               1 5.0   4.0  0.37    -1.87 0.56
## (4) author no stats backgroun - stats phd   1 5.0   4.0 -1.07     0.16 0.38
## (5) story-first - data-first                1 5.0   4.0 -0.39    -1.91 0.53
## (6) hate (rip up) - love (publish)          2 4.5   2.5 -0.10    -2.00 0.33
## (7) not clever - clever                     1 4.5   3.5 -0.04    -1.93 0.45
## ARF TODO reformat as df to get ridgeplot of histograms w/ constructs as rows?

Construct Correlations (RAW)

constructCor(p15)
## 
## ##############################
## Correlation between constructs
## ##############################
## 
## Type of correlation:  pearson 
## 
##                             1     2     3     4     5     6     7
## context-de - stand-alon 1      0.32  0.57 -0.44 -0.50  0.73  0.54
## quick to m - long time  2            0.82  0.14 -0.11  0.50  0.29
## sole autho - team autho 3                 -0.16 -0.39  0.52  0.43
## author no  - stats phd  4                        0.77 -0.24 -0.41
## story-firs - data-first 5                             -0.31 -0.52
## hate (rip  - love (publ 6                                    0.83
## not clever - clever     7

Describe Elements

# calculate descriptive statistics 
statsElements(p15)
## 
## ##################################
## Desriptive statistics for elements
## ##################################
## 
##                        vars n mean   sd median trimmed  mad min max range  skew
## (1) care-bear             1 7 2.50 1.89      1    2.50 0.00 1.0 5.0   4.0  0.28
## (2) diamond-lady          2 7 3.79 1.41      4    3.79 1.48 1.0 5.0   4.0 -0.92
## (3) bullets               3 7 3.71 1.15      4    3.71 0.74 1.5 5.0   3.5 -0.80
## (4) heatmap               4 7 2.86 1.11      3    2.86 1.48 1.5 4.5   3.0  0.23
## (5) taxes                 5 7 4.64 0.48      5    4.64 0.00 4.0 5.0   1.0 -0.47
## (6) penguins              6 7 3.71 1.22      4    3.71 0.74 2.0 5.0   3.0 -0.55
## (7) physics-matplotlib    7 7 2.07 1.54      1    2.07 0.00 1.0 4.5   3.5  0.65
## (8) interaction           8 7 2.43 1.48      2    2.43 1.48 1.0 4.5   3.5  0.51
## (9) slope-interaction     9 7 2.57 1.69      2    2.57 1.48 1.0 4.5   3.5  0.16
## (10) bayesian            10 7 3.00 1.55      3    3.00 2.22 1.0 5.0   4.0 -0.17
##                        kurtosis   se
## (1) care-bear             -2.11 0.72
## (2) diamond-lady          -0.64 0.53
## (3) bullets               -0.76 0.43
## (4) heatmap               -1.72 0.42
## (5) taxes                 -1.86 0.18
## (6) penguins              -1.66 0.46
## (7) physics-matplotlib    -1.63 0.58
## (8) interaction           -1.67 0.56
## (9) slope-interaction     -2.13 0.64
## (10) bayesian             -1.70 0.59

Element Correlations

elementCor(p15)
## 
## ############################
## Correlation between elements
## ############################
## 
## Type of correlation:  Cohens's rc (invariant to scale reflection) 
## 
##                             1     2     3     4     5     6     7     8     9
## (1) care-bear           1      0.08  0.38 -0.47 -0.32 -0.38 -0.23 -0.34  0.00
## (2) diamond-lady        2            0.77  0.41  0.52 -0.04 -0.94 -0.88 -0.72
## (3) bullets             3                 -0.08  0.46  0.02 -0.74 -0.69 -0.33
## (4) heatmap             4                        0.00  0.03 -0.20 -0.23 -0.21
## (5) taxes               5                              0.53 -0.57 -0.45 -0.26
## (6) penguins            6                                    0.14  0.27  0.41
## (7) physics-matplotlib  7                                          0.95  0.76
## (8) interaction         8                                                0.68
## (9) slope-interaction   9                                                    
## (10) bayesian          10                                                    
##                           10
## (1) care-bear          -0.34
## (2) diamond-lady       -0.62
## (3) bullets            -0.43
## (4) heatmap            -0.41
## (5) taxes              -0.15
## (6) penguins            0.43
## (7) physics-matplotlib  0.66
## (8) interaction         0.85
## (9) slope-interaction   0.38
## (10) bayesian